Food 101

This is a deep learning project. This dataset has 101,000 images for 101 classes. In this project, I tried ResNet50 and DenseNet, and the final model is DenseNet121. Transfer Learning is utilized, and pretrained Model is based on Image Net, which has a good feature extract ability.

Traing Environment: Google Cloud Platform with Nvidia Tesla P100

Accuracy: Top1 84%, Top5 93%

Paper: Densely Connected Convolutional Networks

DataSet: Food101

1.1 Import Tools

In [1]:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'

import time
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt

import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from torch.utils.tensorboard import SummaryWriter

from autoaugment import ImageNetPolicy

# Use GPU if it's available
if torch.cuda.is_available():
    device = torch.device("cuda")
    torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
    device = torch.device("cpu")
    torch.set_default_tensor_type(torch.FloatTensor)
In [2]:
# Hyper-parameters
num_epochs = 100
learning_rate = 0.01
batch_size = 128

1.2 DataSet

In [3]:
data_path = r"/home/joezhou1994001_gmail_com/dataset/food-101"
data_path = r"/home/ec2-user/dataset/food-101"
data_path = r"/Volumes/SD/dataset/food-101"

# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
                                       transforms.RandomResizedCrop(224),
                                       transforms.RandomHorizontalFlip(), ImageNetPolicy(),
                                       transforms.ToTensor(),
                                       transforms.Normalize([0.485, 0.456, 0.406],
                                                            [0.229, 0.224, 0.225])])

test_transforms = transforms.Compose([transforms.Resize(255),
                                      transforms.CenterCrop(224),
                                      transforms.ToTensor(),
                                      transforms.Normalize([0.485, 0.456, 0.406],
                                                           [0.229, 0.224, 0.225])])

# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_path + r'/train', transform=train_transforms)
val_data = datasets.ImageFolder(data_path + r'/val', transform=test_transforms)
test_data=datasets.ImageFolder(data_path + r'/test', transform=test_transforms)

train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=128, shuffle=True)
test_loader=torch.utils.data.DataLoader(test_data, batch_size=64, shuffle=True)

2.1 Choose Model

In [4]:
# DenseNet121
model =models.densenet121(pretrained=True)

# ResNet
# model =models.resnet(50)(pretrained=True)

# model

2.2 Model Validation

In [36]:
######################################################
# Run this block first time you run, 
# for validation purpose, make sure the model works.
######################################################


# Freeze parameters so we don't backprop through them
# 固定住已经训练好的参数
for param in model.parameters():
    param.requires_grad = False

# Edit the last full connection layer   1024 -> 500 -> 101
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
                          ('fc1', nn.Linear(1024, 500)),
                          ('relu', nn.ReLU()),
                          ('fc2', nn.Linear(500, 101)),
                          ('output', nn.LogSoftmax(dim=1))
                          ]))

model.classifier = classifier
model.to(device)

criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=0.001)

for ii, (inputs, labels) in enumerate(train_loader):
    # Move input and label tensors to the GPU
    inputs, labels = inputs.to(device), labels.to(device)

    start = time.time()

    outputs = model.forward(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    if ii==3:
        break

print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds")
Device = cuda; Time per batch: 0.010 seconds

2.3 Adjust Model

Last layer of retrained model is [1024, 1000]. Here we have 101 classes, so change it.

In [5]:
#model = models.densenet201(pretrained=True)
model.to(device)

# Freeze parameters so we don't backprop through them
for param in model.parameters():
    param.requires_grad = False

model.classifier = nn.Sequential(
                                nn.Linear(1024,512), 
                                nn.LeakyReLU(), 
                                nn.Linear(512,256),
                                nn.LeakyReLU(), 
                                nn.Linear(256,101)
                            )

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=0.001, betas=[0.9, 0.999])

3.1 Train Model

In [6]:
from torch.utils.tensorboard import SummaryWriter

def train(num_epochs,train_loader,test_loader, resnet, optimizer, criterion):
    """
    Returns trained model
    """
    # Record Loss and Accuracy to tensorboard. Default logdir is runs
    writer = SummaryWriter()
    start = time.time()
    
    # initialize tracker for minimum validation loss.
    test_loss_min = np.Inf
    step = 0
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        ################################################
        #
        #     Training Mode
        #
        ###############################################
        model.train()
        train_loss=0
        for i, data in enumerate(train_loader):
            inputs, labels = data[0].to(device), data[1].to(device)
  
            # Forward
            outputs = resnet(inputs)
            loss = criterion(outputs, labels)

            # Backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # Loss/Accuracy
            train_loss += loss.item()
            _, argmax = torch.max(outputs, 1)
            accuracy = (labels == argmax.squeeze()).float().mean()
            
            # Print Train Accuracy/Loss every 100 steps.
            if (i+1) %100 == 0:     
                time_cost = time.time()-start
                print ('Train: Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuray: {}, Time:{}'.format(epoch+1, num_epochs, i+1, total_step, loss.item(), accuracy.item(), time_cost))
    
                writer.add_scalar('Loss/train', loss.item(), step)
                writer.add_scalar('Accuracy/train', accuracy, step)
                step += 100

        # Save the newest model. According to testing accuracy record. we can find the best model in history.
        torch.save(model.state_dict(), 'models/temp/food101_{}.pth'.format(time.time()))

        
        ################################################
        #
        #     Testing Mode
        #
        ###############################################
        model.eval()
        test_loss = 0
        test_accuracy = 0
        test_step = 0
        with torch.no_grad():
            for i, data in enumerate(test_loader):
                # For testing purpost, only calculate first 30 batches.
                # if i == 30: break
                images, labels = data[0].to(device), data[1].to(device)

                # Forward
                outputs = model(images)
                batch_loss = criterion(outputs, labels)
                test_loss += batch_loss.item()
                
                # Get topk predition. 预测中topk
                top_p, top_class = outputs.topk(5, dim=1)
                labels = labels.view(-1,1)        #  [n] 转为 [n, 1] , 以便与 [n, 5] 做比较    

                # Is right answer in topk?   判断topk中是否判断正确
                results =  top_class == labels # tensor([[False, False, False,  True, False],  [False,  True, False, False, False], ....)
                results = results.sum(1)          # tensor([1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1]), 每一行只要有一个为True就返回1, dim=1表示对维度1求和

                # Calculate average accuracy. 每一个batch有一个Accuracy, 最后除以batch数, 求平均
                test_accuracy += torch.mean(results.float()).item()

        # Avg Accuracy/Loss for each epoch
        avg_test_accuracy = test_accuracy/(i+1)
        avg_test_loss = test_loss/(i+1)
        time_cost = time.time()-start
        print ('Test: Epoch [{}/{}], Loss: {:.4f}, Accuray Top5: {:.3%}, Time:{}'.format(epoch+1, num_epochs, avg_test_loss, avg_test_accuracy, time_cost))

        # Record testing result in tensorboard.
        writer.add_scalar('Loss/test', avg_test_loss, epoch)
        writer.add_scalar('Accuracy/test', avg_test_accuracy, epoch)
        
        # Save the model with lower loss.
        if test_loss <= test_loss_min:
            print("Saved Model with Lower Loss")
            torch.save(model.state_dict(),'models/food101_less_loss.pth')
            test_loss_min=avg_test_loss

        # End Testing Mode
        model.train()
In [9]:
train(100, train_loader, test_loader, model, optimizer, criterion)
Train: Epoch [1/100], Step [100/474], Loss: 2.2092, Accuray: 0.453125, Time:545.367484331131
Train: Epoch [1/100], Step [200/474], Loss: 2.2136, Accuray: 0.453125, Time:1080.565059185028
Train: Epoch [1/100], Step [300/474], Loss: 1.9564, Accuray: 0.53125, Time:1614.4209067821503
Train: Epoch [1/100], Step [400/474], Loss: 2.5359, Accuray: 0.3671875, Time:2139.187469482422
Test: Epoch [1/100], Loss: 1.5376, Accuray Top5: 84.219%, Time:3281.716363430023
Saved Model with Lower Loss
Train: Epoch [2/100], Step [100/474], Loss: 2.2630, Accuray: 0.4453125, Time:3395.1930632591248
Train: Epoch [2/100], Step [200/474], Loss: 2.1683, Accuray: 0.4375, Time:3507.8141384124756
Train: Epoch [2/100], Step [300/474], Loss: 2.3438, Accuray: 0.40625, Time:3620.5777819156647
Train: Epoch [2/100], Step [400/474], Loss: 2.0748, Accuray: 0.5, Time:3733.498116016388
Test: Epoch [2/100], Loss: 1.5521, Accuray Top5: 83.955%, Time:3980.749472141266
Train: Epoch [3/100], Step [100/474], Loss: 1.8954, Accuray: 0.578125, Time:4092.636069059372
Train: Epoch [3/100], Step [200/474], Loss: 2.2844, Accuray: 0.421875, Time:4204.778349876404
Train: Epoch [3/100], Step [300/474], Loss: 2.2125, Accuray: 0.46875, Time:4316.981889486313
Train: Epoch [3/100], Step [400/474], Loss: 2.1962, Accuray: 0.4765625, Time:4428.826766490936
Test: Epoch [3/100], Loss: 1.5474, Accuray Top5: 83.919%, Time:4674.831803321838
Train: Epoch [4/100], Step [100/474], Loss: 2.5080, Accuray: 0.390625, Time:4786.085284233093
Train: Epoch [4/100], Step [200/474], Loss: 2.3959, Accuray: 0.4296875, Time:4898.40386390686
Train: Epoch [4/100], Step [300/474], Loss: 2.1583, Accuray: 0.453125, Time:5010.33123922348
Train: Epoch [4/100], Step [400/474], Loss: 2.1684, Accuray: 0.4609375, Time:5123.227366447449
Test: Epoch [4/100], Loss: 1.5559, Accuray Top5: 83.860%, Time:5369.396678209305
Train: Epoch [5/100], Step [100/474], Loss: 2.0720, Accuray: 0.4765625, Time:5482.382653236389
Train: Epoch [5/100], Step [200/474], Loss: 2.3724, Accuray: 0.4375, Time:5595.3631229400635
Train: Epoch [5/100], Step [300/474], Loss: 2.1678, Accuray: 0.4765625, Time:5708.1843338012695
Train: Epoch [5/100], Step [400/474], Loss: 2.2119, Accuray: 0.4921875, Time:5821.085914611816
Test: Epoch [5/100], Loss: 1.5655, Accuray Top5: 83.754%, Time:6068.745126247406
Train: Epoch [6/100], Step [100/474], Loss: 1.7827, Accuray: 0.53125, Time:6181.178681135178
Train: Epoch [6/100], Step [200/474], Loss: 2.1898, Accuray: 0.453125, Time:6293.843550443649
Train: Epoch [6/100], Step [300/474], Loss: 2.0021, Accuray: 0.5234375, Time:6407.116284132004
Train: Epoch [6/100], Step [400/474], Loss: 2.1036, Accuray: 0.4609375, Time:6519.223564863205
Test: Epoch [6/100], Loss: 1.5496, Accuray Top5: 84.157%, Time:6767.147189378738
Train: Epoch [7/100], Step [100/474], Loss: 2.0756, Accuray: 0.4921875, Time:6878.6027019023895
Train: Epoch [7/100], Step [200/474], Loss: 2.2960, Accuray: 0.46875, Time:6991.073362588882
Train: Epoch [7/100], Step [300/474], Loss: 2.0817, Accuray: 0.4609375, Time:7102.626349210739
Train: Epoch [7/100], Step [400/474], Loss: 2.4840, Accuray: 0.3984375, Time:7214.446965456009
Test: Epoch [7/100], Loss: 1.5360, Accuray Top5: 84.229%, Time:7459.154694080353
Train: Epoch [8/100], Step [100/474], Loss: 2.2757, Accuray: 0.4609375, Time:7570.603366613388
Train: Epoch [8/100], Step [200/474], Loss: 2.4580, Accuray: 0.3515625, Time:7681.827204465866
Train: Epoch [8/100], Step [300/474], Loss: 2.1919, Accuray: 0.4765625, Time:7793.497203588486
Train: Epoch [8/100], Step [400/474], Loss: 2.3985, Accuray: 0.3984375, Time:7904.359282255173
Test: Epoch [8/100], Loss: 1.5573, Accuray Top5: 84.007%, Time:8148.416377782822
Train: Epoch [9/100], Step [100/474], Loss: 1.9459, Accuray: 0.4609375, Time:8259.353439331055
Train: Epoch [9/100], Step [200/474], Loss: 2.2169, Accuray: 0.453125, Time:8370.771568059921
Train: Epoch [9/100], Step [300/474], Loss: 2.0998, Accuray: 0.4609375, Time:8481.601388692856
Train: Epoch [9/100], Step [400/474], Loss: 2.4196, Accuray: 0.4375, Time:8593.234732866287
Test: Epoch [9/100], Loss: 1.5553, Accuray Top5: 83.902%, Time:8837.345322847366
Train: Epoch [10/100], Step [100/474], Loss: 2.3571, Accuray: 0.4453125, Time:8948.85761809349
Train: Epoch [10/100], Step [200/474], Loss: 2.3648, Accuray: 0.4375, Time:9059.946095705032
Train: Epoch [10/100], Step [300/474], Loss: 2.0622, Accuray: 0.4921875, Time:9171.406403303146
Train: Epoch [10/100], Step [400/474], Loss: 2.4898, Accuray: 0.421875, Time:9282.548441886902
Test: Epoch [10/100], Loss: 1.5641, Accuray Top5: 83.862%, Time:9527.27408337593
Train: Epoch [11/100], Step [100/474], Loss: 2.0571, Accuray: 0.53125, Time:9638.48413348198
Train: Epoch [11/100], Step [200/474], Loss: 2.0366, Accuray: 0.5078125, Time:9749.953535318375
Train: Epoch [11/100], Step [300/474], Loss: 1.9701, Accuray: 0.5, Time:9861.159670114517
Train: Epoch [11/100], Step [400/474], Loss: 1.9448, Accuray: 0.5078125, Time:9972.483085632324
Test: Epoch [11/100], Loss: 1.5370, Accuray Top5: 84.168%, Time:10216.353817462921
Train: Epoch [12/100], Step [100/474], Loss: 2.0315, Accuray: 0.4921875, Time:10327.24104475975
Train: Epoch [12/100], Step [200/474], Loss: 2.0116, Accuray: 0.5, Time:10438.340398073196
Train: Epoch [12/100], Step [300/474], Loss: 2.0663, Accuray: 0.4453125, Time:10549.971930027008
Train: Epoch [12/100], Step [400/474], Loss: 2.1058, Accuray: 0.3984375, Time:10660.566868543625
Test: Epoch [12/100], Loss: 1.5585, Accuray Top5: 83.844%, Time:10904.504984855652
Train: Epoch [13/100], Step [100/474], Loss: 2.1353, Accuray: 0.4921875, Time:11015.058251142502
Train: Epoch [13/100], Step [200/474], Loss: 2.0530, Accuray: 0.46875, Time:11126.266103029251
Train: Epoch [13/100], Step [300/474], Loss: 1.8649, Accuray: 0.484375, Time:11237.133161783218
Train: Epoch [13/100], Step [400/474], Loss: 1.9007, Accuray: 0.5078125, Time:11348.38438653946
Test: Epoch [13/100], Loss: 1.5371, Accuray Top5: 84.270%, Time:11592.961667776108
Train: Epoch [14/100], Step [100/474], Loss: 2.1563, Accuray: 0.484375, Time:11704.192947149277
Train: Epoch [14/100], Step [200/474], Loss: 2.1066, Accuray: 0.46875, Time:11815.103618144989
Train: Epoch [14/100], Step [300/474], Loss: 2.2460, Accuray: 0.4375, Time:11926.255444526672
Train: Epoch [14/100], Step [400/474], Loss: 2.1486, Accuray: 0.5234375, Time:12036.751095294952
Test: Epoch [14/100], Loss: 1.5408, Accuray Top5: 84.245%, Time:12280.657790184021
Train: Epoch [15/100], Step [100/474], Loss: 2.2585, Accuray: 0.40625, Time:12391.36250281334
Train: Epoch [15/100], Step [200/474], Loss: 1.9913, Accuray: 0.5078125, Time:12502.473951816559
Train: Epoch [15/100], Step [300/474], Loss: 1.9489, Accuray: 0.4921875, Time:12613.286772966385
Train: Epoch [15/100], Step [400/474], Loss: 2.0711, Accuray: 0.5078125, Time:12724.903625249863
Test: Epoch [15/100], Loss: 1.5312, Accuray Top5: 84.129%, Time:12969.141369342804
Train: Epoch [16/100], Step [100/474], Loss: 2.2806, Accuray: 0.40625, Time:13080.411165475845
Train: Epoch [16/100], Step [200/474], Loss: 2.2522, Accuray: 0.484375, Time:13191.137923002243
Train: Epoch [16/100], Step [300/474], Loss: 2.1497, Accuray: 0.4765625, Time:13302.144077777863
Train: Epoch [16/100], Step [400/474], Loss: 2.0758, Accuray: 0.5, Time:13412.723506212234
Test: Epoch [16/100], Loss: 1.5352, Accuray Top5: 83.920%, Time:13657.21586728096
Train: Epoch [17/100], Step [100/474], Loss: 2.2223, Accuray: 0.4765625, Time:13768.54148030281
Train: Epoch [17/100], Step [200/474], Loss: 2.0154, Accuray: 0.4453125, Time:13879.596536636353
Train: Epoch [17/100], Step [300/474], Loss: 2.1444, Accuray: 0.515625, Time:13990.642752408981
Train: Epoch [17/100], Step [400/474], Loss: 2.3340, Accuray: 0.4609375, Time:14102.086283683777
Test: Epoch [17/100], Loss: 1.5486, Accuray Top5: 84.176%, Time:14346.378109931946
Train: Epoch [18/100], Step [100/474], Loss: 2.4310, Accuray: 0.3984375, Time:14457.616476297379
Train: Epoch [18/100], Step [200/474], Loss: 2.0438, Accuray: 0.4453125, Time:14568.562551021576
Train: Epoch [18/100], Step [300/474], Loss: 2.1322, Accuray: 0.4921875, Time:14679.599018573761
Train: Epoch [18/100], Step [400/474], Loss: 2.2007, Accuray: 0.4765625, Time:14790.328458070755
Test: Epoch [18/100], Loss: 1.5660, Accuray Top5: 83.886%, Time:15034.784904956818
Train: Epoch [19/100], Step [100/474], Loss: 1.8756, Accuray: 0.5234375, Time:15145.674757003784
Train: Epoch [19/100], Step [200/474], Loss: 2.1428, Accuray: 0.484375, Time:15257.256936788559
Train: Epoch [19/100], Step [300/474], Loss: 2.2422, Accuray: 0.46875, Time:15367.569503068924
Train: Epoch [19/100], Step [400/474], Loss: 1.9930, Accuray: 0.4921875, Time:15478.748666286469
Test: Epoch [19/100], Loss: 1.5515, Accuray Top5: 84.086%, Time:15721.953993558884
Train: Epoch [20/100], Step [100/474], Loss: 2.0082, Accuray: 0.515625, Time:15832.904603719711
Train: Epoch [20/100], Step [200/474], Loss: 2.1381, Accuray: 0.5, Time:15943.420501947403
Train: Epoch [20/100], Step [300/474], Loss: 2.2798, Accuray: 0.5078125, Time:16054.247993946075
Train: Epoch [20/100], Step [400/474], Loss: 2.4338, Accuray: 0.4296875, Time:16164.954780101776
Test: Epoch [20/100], Loss: 1.5451, Accuray Top5: 83.937%, Time:16408.742777585983
Train: Epoch [21/100], Step [100/474], Loss: 1.9119, Accuray: 0.5078125, Time:16519.574125289917
Train: Epoch [21/100], Step [200/474], Loss: 2.4525, Accuray: 0.4296875, Time:16630.67910385132
Train: Epoch [21/100], Step [300/474], Loss: 1.9652, Accuray: 0.53125, Time:16741.47596001625
Train: Epoch [21/100], Step [400/474], Loss: 2.1397, Accuray: 0.5, Time:16852.434773683548
Test: Epoch [21/100], Loss: 1.5331, Accuray Top5: 84.333%, Time:17095.99574136734
Train: Epoch [22/100], Step [100/474], Loss: 2.1779, Accuray: 0.453125, Time:17207.105558872223
Train: Epoch [22/100], Step [200/474], Loss: 1.9771, Accuray: 0.5078125, Time:17317.82376885414
Train: Epoch [22/100], Step [300/474], Loss: 2.3170, Accuray: 0.4375, Time:17429.043103933334
Train: Epoch [22/100], Step [400/474], Loss: 2.0797, Accuray: 0.4296875, Time:17539.573291301727
Test: Epoch [22/100], Loss: 1.5391, Accuray Top5: 84.238%, Time:17783.22416114807
Train: Epoch [23/100], Step [100/474], Loss: 2.2595, Accuray: 0.421875, Time:17893.606512784958
Train: Epoch [23/100], Step [200/474], Loss: 2.1707, Accuray: 0.4453125, Time:18004.597441911697
Train: Epoch [23/100], Step [300/474], Loss: 2.5147, Accuray: 0.40625, Time:18114.919152498245
Train: Epoch [23/100], Step [400/474], Loss: 2.0449, Accuray: 0.53125, Time:18226.089134454727
Test: Epoch [23/100], Loss: 1.5329, Accuray Top5: 84.368%, Time:18470.101070404053
Train: Epoch [24/100], Step [100/474], Loss: 2.4252, Accuray: 0.4453125, Time:18581.20978832245
Train: Epoch [24/100], Step [200/474], Loss: 2.0762, Accuray: 0.453125, Time:18691.70954799652
Train: Epoch [24/100], Step [300/474], Loss: 1.9764, Accuray: 0.484375, Time:18803.01273417473
Train: Epoch [24/100], Step [400/474], Loss: 2.2789, Accuray: 0.4765625, Time:18913.722131490707
Test: Epoch [24/100], Loss: 1.5564, Accuray Top5: 84.080%, Time:19157.176887750626
Train: Epoch [25/100], Step [100/474], Loss: 2.2723, Accuray: 0.4453125, Time:19267.922996759415
Train: Epoch [25/100], Step [200/474], Loss: 2.0439, Accuray: 0.4765625, Time:19378.82658314705
Train: Epoch [25/100], Step [300/474], Loss: 2.5285, Accuray: 0.40625, Time:19489.651166677475
Train: Epoch [25/100], Step [400/474], Loss: 1.9518, Accuray: 0.5, Time:19601.355087041855
Test: Epoch [25/100], Loss: 1.5450, Accuray Top5: 84.321%, Time:19846.10639333725
Train: Epoch [26/100], Step [100/474], Loss: 2.0753, Accuray: 0.4453125, Time:19957.721410274506
Train: Epoch [26/100], Step [200/474], Loss: 2.0213, Accuray: 0.5, Time:20068.99129128456
Train: Epoch [26/100], Step [300/474], Loss: 2.4929, Accuray: 0.3828125, Time:20180.69574689865
Train: Epoch [26/100], Step [400/474], Loss: 2.0984, Accuray: 0.453125, Time:20291.650700092316
Test: Epoch [26/100], Loss: 1.5418, Accuray Top5: 84.252%, Time:20536.4169857502
Train: Epoch [27/100], Step [100/474], Loss: 1.9900, Accuray: 0.515625, Time:20645.81082892418
Train: Epoch [27/100], Step [200/474], Loss: 2.0796, Accuray: 0.4609375, Time:20755.275408744812
Train: Epoch [27/100], Step [300/474], Loss: 2.0584, Accuray: 0.5, Time:20863.727331638336
Train: Epoch [27/100], Step [400/474], Loss: 1.9119, Accuray: 0.5078125, Time:20973.090785980225
Test: Epoch [27/100], Loss: 1.5405, Accuray Top5: 84.141%, Time:21215.688612937927
Train: Epoch [28/100], Step [100/474], Loss: 2.1150, Accuray: 0.484375, Time:21326.51093029976
Train: Epoch [28/100], Step [200/474], Loss: 2.0581, Accuray: 0.453125, Time:21437.690863132477
Train: Epoch [28/100], Step [300/474], Loss: 2.2921, Accuray: 0.421875, Time:21548.56617331505
Train: Epoch [28/100], Step [400/474], Loss: 2.1477, Accuray: 0.4609375, Time:21660.650354862213
Test: Epoch [28/100], Loss: 1.5371, Accuray Top5: 84.310%, Time:21906.436503887177
Train: Epoch [29/100], Step [100/474], Loss: 2.0127, Accuray: 0.5078125, Time:22018.91152048111
Train: Epoch [29/100], Step [200/474], Loss: 2.2435, Accuray: 0.453125, Time:22131.07711148262
Train: Epoch [29/100], Step [300/474], Loss: 2.1059, Accuray: 0.4609375, Time:22242.737766742706
Train: Epoch [29/100], Step [400/474], Loss: 2.3943, Accuray: 0.4140625, Time:22355.045744895935
Test: Epoch [29/100], Loss: 1.5297, Accuray Top5: 84.311%, Time:22600.71276307106
Train: Epoch [30/100], Step [100/474], Loss: 2.1669, Accuray: 0.5078125, Time:22712.598845005035
Train: Epoch [30/100], Step [200/474], Loss: 1.7987, Accuray: 0.5390625, Time:22824.779207229614
Train: Epoch [30/100], Step [300/474], Loss: 2.2168, Accuray: 0.4765625, Time:22936.23351430893
Train: Epoch [30/100], Step [400/474], Loss: 2.1080, Accuray: 0.4609375, Time:23048.34866976738
Test: Epoch [30/100], Loss: 1.5552, Accuray Top5: 84.167%, Time:23294.359250307083
Train: Epoch [31/100], Step [100/474], Loss: 2.2193, Accuray: 0.4296875, Time:23406.395131111145
Train: Epoch [31/100], Step [200/474], Loss: 2.1403, Accuray: 0.4765625, Time:23518.552308797836
Train: Epoch [31/100], Step [300/474], Loss: 2.3332, Accuray: 0.421875, Time:23631.349756717682
Train: Epoch [31/100], Step [400/474], Loss: 2.2630, Accuray: 0.453125, Time:23743.514258623123
Test: Epoch [31/100], Loss: 1.5325, Accuray Top5: 84.533%, Time:23989.852831363678
Train: Epoch [32/100], Step [100/474], Loss: 2.0681, Accuray: 0.4921875, Time:24101.65410900116
Train: Epoch [32/100], Step [200/474], Loss: 2.2110, Accuray: 0.40625, Time:24214.0582010746
Train: Epoch [32/100], Step [300/474], Loss: 2.0825, Accuray: 0.4765625, Time:24325.920860290527
Train: Epoch [32/100], Step [400/474], Loss: 2.1098, Accuray: 0.4921875, Time:24438.6230199337
Test: Epoch [32/100], Loss: 1.5464, Accuray Top5: 84.284%, Time:24689.398778438568
Train: Epoch [33/100], Step [100/474], Loss: 2.1809, Accuray: 0.4296875, Time:24803.789471149445
Train: Epoch [33/100], Step [200/474], Loss: 2.2671, Accuray: 0.40625, Time:24917.36795258522
Train: Epoch [33/100], Step [300/474], Loss: 2.4051, Accuray: 0.421875, Time:25031.265622615814
Train: Epoch [33/100], Step [400/474], Loss: 2.1357, Accuray: 0.453125, Time:25144.58429503441
Test: Epoch [33/100], Loss: 1.5435, Accuray Top5: 84.318%, Time:25395.78115773201
Train: Epoch [34/100], Step [100/474], Loss: 1.9412, Accuray: 0.5, Time:25508.86887073517
Train: Epoch [34/100], Step [200/474], Loss: 1.8658, Accuray: 0.5234375, Time:25622.776985406876
Train: Epoch [34/100], Step [300/474], Loss: 2.2258, Accuray: 0.5, Time:25736.486234903336
Train: Epoch [34/100], Step [400/474], Loss: 2.1219, Accuray: 0.4453125, Time:25850.586701631546
Test: Epoch [34/100], Loss: 1.5526, Accuray Top5: 83.919%, Time:26099.030680656433
Train: Epoch [35/100], Step [100/474], Loss: 1.7826, Accuray: 0.5703125, Time:26211.948581695557
Train: Epoch [35/100], Step [200/474], Loss: 2.3204, Accuray: 0.453125, Time:26324.880024433136
Train: Epoch [35/100], Step [300/474], Loss: 2.1102, Accuray: 0.4375, Time:26437.53804707527
Train: Epoch [35/100], Step [400/474], Loss: 2.2392, Accuray: 0.484375, Time:26550.19703388214
Test: Epoch [35/100], Loss: 1.5433, Accuray Top5: 84.138%, Time:26797.461314678192
Train: Epoch [36/100], Step [100/474], Loss: 2.1833, Accuray: 0.4140625, Time:26909.609550237656
Train: Epoch [36/100], Step [200/474], Loss: 1.8169, Accuray: 0.5078125, Time:27022.55230164528
Train: Epoch [36/100], Step [300/474], Loss: 2.1440, Accuray: 0.40625, Time:27134.26833629608
Train: Epoch [36/100], Step [400/474], Loss: 1.7852, Accuray: 0.5625, Time:27246.31538438797
Test: Epoch [36/100], Loss: 1.5486, Accuray Top5: 83.966%, Time:27491.624095201492
Train: Epoch [37/100], Step [100/474], Loss: 1.9501, Accuray: 0.53125, Time:27603.412558555603
Train: Epoch [37/100], Step [200/474], Loss: 2.1477, Accuray: 0.4765625, Time:27714.760132551193
Train: Epoch [37/100], Step [300/474], Loss: 2.2471, Accuray: 0.46875, Time:27826.624513864517
Train: Epoch [37/100], Step [400/474], Loss: 2.0698, Accuray: 0.5078125, Time:27938.198852300644
Test: Epoch [37/100], Loss: 1.5486, Accuray Top5: 83.930%, Time:28185.477843999863
Train: Epoch [38/100], Step [100/474], Loss: 2.0167, Accuray: 0.53125, Time:28297.92029929161
Train: Epoch [38/100], Step [200/474], Loss: 2.2052, Accuray: 0.5, Time:28410.147056102753
Train: Epoch [38/100], Step [300/474], Loss: 2.0906, Accuray: 0.4140625, Time:28522.46834206581
Train: Epoch [38/100], Step [400/474], Loss: 2.1424, Accuray: 0.4765625, Time:28634.86826324463
Test: Epoch [38/100], Loss: 1.5616, Accuray Top5: 83.985%, Time:28883.722232103348
Train: Epoch [39/100], Step [100/474], Loss: 1.8370, Accuray: 0.5234375, Time:29001.790465831757
Train: Epoch [39/100], Step [200/474], Loss: 2.3335, Accuray: 0.4375, Time:29119.850224256516
Train: Epoch [39/100], Step [300/474], Loss: 2.4323, Accuray: 0.40625, Time:29238.8295712471
Train: Epoch [39/100], Step [400/474], Loss: 2.4174, Accuray: 0.375, Time:29356.88758611679
Test: Epoch [39/100], Loss: 1.5466, Accuray Top5: 84.307%, Time:29616.49042081833
Train: Epoch [40/100], Step [100/474], Loss: 2.3523, Accuray: 0.4765625, Time:29733.335287809372
Train: Epoch [40/100], Step [200/474], Loss: 1.9521, Accuray: 0.53125, Time:29848.724090576172
Train: Epoch [40/100], Step [300/474], Loss: 2.1131, Accuray: 0.4765625, Time:29964.36598777771
Train: Epoch [40/100], Step [400/474], Loss: 2.2765, Accuray: 0.453125, Time:30080.798176050186
Test: Epoch [40/100], Loss: 1.5532, Accuray Top5: 84.116%, Time:30334.630036354065
Train: Epoch [41/100], Step [100/474], Loss: 2.4898, Accuray: 0.3203125, Time:30449.711794376373
Train: Epoch [41/100], Step [200/474], Loss: 1.9881, Accuray: 0.46875, Time:30564.315952301025
Train: Epoch [41/100], Step [300/474], Loss: 1.9682, Accuray: 0.484375, Time:30679.08065223694
Train: Epoch [41/100], Step [400/474], Loss: 2.3099, Accuray: 0.4921875, Time:30792.885321617126
Test: Epoch [41/100], Loss: 1.5666, Accuray Top5: 83.609%, Time:31043.38093304634
Train: Epoch [42/100], Step [100/474], Loss: 2.2314, Accuray: 0.4296875, Time:31158.374940395355
Train: Epoch [42/100], Step [200/474], Loss: 2.1410, Accuray: 0.4609375, Time:31272.595238685608
Train: Epoch [42/100], Step [300/474], Loss: 2.3300, Accuray: 0.421875, Time:31386.85727763176
Train: Epoch [42/100], Step [400/474], Loss: 2.1792, Accuray: 0.453125, Time:31501.411850214005
Test: Epoch [42/100], Loss: 1.5510, Accuray Top5: 84.359%, Time:31751.214235067368
Train: Epoch [43/100], Step [100/474], Loss: 2.0016, Accuray: 0.4921875, Time:31864.022988796234
Train: Epoch [43/100], Step [200/474], Loss: 2.1345, Accuray: 0.4296875, Time:31978.165506839752
Train: Epoch [43/100], Step [300/474], Loss: 2.2551, Accuray: 0.4453125, Time:32092.378796339035
Train: Epoch [43/100], Step [400/474], Loss: 2.1879, Accuray: 0.4375, Time:32206.427614688873
Test: Epoch [43/100], Loss: 1.5399, Accuray Top5: 84.415%, Time:32456.889922380447
Train: Epoch [44/100], Step [100/474], Loss: 2.1610, Accuray: 0.4375, Time:32570.580847024918
Train: Epoch [44/100], Step [200/474], Loss: 2.0806, Accuray: 0.484375, Time:32684.435611963272
Train: Epoch [44/100], Step [300/474], Loss: 1.9289, Accuray: 0.53125, Time:32797.45775747299
Train: Epoch [44/100], Step [400/474], Loss: 2.4251, Accuray: 0.390625, Time:32910.9806497097
Test: Epoch [44/100], Loss: 1.5346, Accuray Top5: 84.472%, Time:33160.99443936348
Train: Epoch [45/100], Step [100/474], Loss: 2.2175, Accuray: 0.453125, Time:33275.490369558334
Train: Epoch [45/100], Step [200/474], Loss: 2.3796, Accuray: 0.4296875, Time:33389.710555553436
Train: Epoch [45/100], Step [300/474], Loss: 1.8571, Accuray: 0.5078125, Time:33504.619673490524
Train: Epoch [45/100], Step [400/474], Loss: 1.7517, Accuray: 0.6015625, Time:33618.823279857635
Test: Epoch [45/100], Loss: 1.5343, Accuray Top5: 84.372%, Time:33867.73762845993
Train: Epoch [46/100], Step [100/474], Loss: 2.1482, Accuray: 0.4921875, Time:33981.04414391518
Train: Epoch [46/100], Step [200/474], Loss: 1.9877, Accuray: 0.515625, Time:34095.123354911804
Train: Epoch [46/100], Step [300/474], Loss: 2.0480, Accuray: 0.4375, Time:34208.27761888504
Train: Epoch [46/100], Step [400/474], Loss: 2.2761, Accuray: 0.40625, Time:34322.28577566147
Test: Epoch [46/100], Loss: 1.5385, Accuray Top5: 84.435%, Time:34574.066950559616
Train: Epoch [47/100], Step [100/474], Loss: 2.1828, Accuray: 0.4609375, Time:34688.40827989578
Train: Epoch [47/100], Step [200/474], Loss: 1.9520, Accuray: 0.5078125, Time:34803.318652153015
Train: Epoch [47/100], Step [300/474], Loss: 1.9787, Accuray: 0.453125, Time:34918.12555217743
Train: Epoch [47/100], Step [400/474], Loss: 1.9072, Accuray: 0.5078125, Time:35033.80590939522
Test: Epoch [47/100], Loss: 1.5655, Accuray Top5: 83.664%, Time:35287.414496421814
Train: Epoch [48/100], Step [100/474], Loss: 1.9536, Accuray: 0.5234375, Time:35402.93133687973
Train: Epoch [48/100], Step [200/474], Loss: 2.0993, Accuray: 0.4921875, Time:35518.73789668083
Train: Epoch [48/100], Step [300/474], Loss: 2.0402, Accuray: 0.4765625, Time:35633.65640282631
Train: Epoch [48/100], Step [400/474], Loss: 2.0565, Accuray: 0.53125, Time:35748.369837999344
Test: Epoch [48/100], Loss: 1.5415, Accuray Top5: 84.082%, Time:35999.63584804535
Train: Epoch [49/100], Step [100/474], Loss: 2.3205, Accuray: 0.421875, Time:36114.00064969063
Train: Epoch [49/100], Step [200/474], Loss: 2.2260, Accuray: 0.4921875, Time:36227.527983665466
Train: Epoch [49/100], Step [300/474], Loss: 1.8481, Accuray: 0.4609375, Time:36341.87572264671
Train: Epoch [49/100], Step [400/474], Loss: 2.1020, Accuray: 0.5, Time:36455.69117617607
Test: Epoch [49/100], Loss: 1.5334, Accuray Top5: 84.410%, Time:36706.66602039337
Train: Epoch [50/100], Step [100/474], Loss: 2.0720, Accuray: 0.46875, Time:36820.874403715134
Train: Epoch [50/100], Step [200/474], Loss: 2.2533, Accuray: 0.4140625, Time:36935.11421585083
Train: Epoch [50/100], Step [300/474], Loss: 1.9914, Accuray: 0.5078125, Time:37049.039452552795
Train: Epoch [50/100], Step [400/474], Loss: 2.0946, Accuray: 0.4765625, Time:37162.91595983505
Test: Epoch [50/100], Loss: 1.5400, Accuray Top5: 84.043%, Time:37414.823845624924
Train: Epoch [51/100], Step [100/474], Loss: 2.3877, Accuray: 0.4296875, Time:37529.90231227875
Train: Epoch [51/100], Step [200/474], Loss: 2.0379, Accuray: 0.53125, Time:37645.16791343689
Train: Epoch [51/100], Step [300/474], Loss: 2.2449, Accuray: 0.4375, Time:37761.348773002625
Train: Epoch [51/100], Step [400/474], Loss: 1.9806, Accuray: 0.515625, Time:37876.62848567963
Test: Epoch [51/100], Loss: 1.5259, Accuray Top5: 84.649%, Time:38128.81600356102
Train: Epoch [52/100], Step [100/474], Loss: 1.7295, Accuray: 0.484375, Time:38242.855019807816
Train: Epoch [52/100], Step [200/474], Loss: 2.1298, Accuray: 0.4921875, Time:38357.95206952095
Train: Epoch [52/100], Step [300/474], Loss: 2.1786, Accuray: 0.4765625, Time:38473.3715338707
Train: Epoch [52/100], Step [400/474], Loss: 2.3489, Accuray: 0.4296875, Time:38588.940123558044
Test: Epoch [52/100], Loss: 1.5494, Accuray Top5: 84.045%, Time:38840.14168405533
Train: Epoch [53/100], Step [100/474], Loss: 2.3097, Accuray: 0.390625, Time:38954.5298409462
Train: Epoch [53/100], Step [200/474], Loss: 2.5303, Accuray: 0.3671875, Time:39070.029703617096
Train: Epoch [53/100], Step [300/474], Loss: 1.8860, Accuray: 0.546875, Time:39183.864750146866
Train: Epoch [53/100], Step [400/474], Loss: 1.7989, Accuray: 0.546875, Time:39297.84558534622
Test: Epoch [53/100], Loss: 1.5322, Accuray Top5: 84.330%, Time:39551.43840551376
Train: Epoch [54/100], Step [100/474], Loss: 2.0986, Accuray: 0.484375, Time:39665.883420705795
Train: Epoch [54/100], Step [200/474], Loss: 2.1708, Accuray: 0.5, Time:39780.12433099747
Train: Epoch [54/100], Step [300/474], Loss: 2.1799, Accuray: 0.453125, Time:39893.98636460304
Train: Epoch [54/100], Step [400/474], Loss: 2.0468, Accuray: 0.4921875, Time:40007.118894815445
Test: Epoch [54/100], Loss: 1.5282, Accuray Top5: 84.563%, Time:40256.62089705467
Train: Epoch [55/100], Step [100/474], Loss: 2.0626, Accuray: 0.484375, Time:40369.76272249222
Train: Epoch [55/100], Step [200/474], Loss: 2.1050, Accuray: 0.453125, Time:40482.58551669121
Train: Epoch [55/100], Step [300/474], Loss: 2.2253, Accuray: 0.4375, Time:40595.61470580101
Train: Epoch [55/100], Step [400/474], Loss: 2.0323, Accuray: 0.53125, Time:40708.55739068985
Test: Epoch [55/100], Loss: 1.5423, Accuray Top5: 84.240%, Time:40958.06192588806
Train: Epoch [56/100], Step [100/474], Loss: 1.9444, Accuray: 0.515625, Time:41071.23809957504
Train: Epoch [56/100], Step [200/474], Loss: 2.0499, Accuray: 0.4765625, Time:41183.5567278862
Train: Epoch [56/100], Step [300/474], Loss: 2.2172, Accuray: 0.4453125, Time:41295.99663901329
Train: Epoch [56/100], Step [400/474], Loss: 2.0745, Accuray: 0.4765625, Time:41410.600090026855
Test: Epoch [56/100], Loss: 1.5476, Accuray Top5: 84.182%, Time:41662.54051399231
Train: Epoch [57/100], Step [100/474], Loss: 2.3316, Accuray: 0.453125, Time:41776.44248461723
Train: Epoch [57/100], Step [200/474], Loss: 2.2316, Accuray: 0.421875, Time:41891.40840435028
Train: Epoch [57/100], Step [300/474], Loss: 2.3004, Accuray: 0.4140625, Time:42005.20432925224
Train: Epoch [57/100], Step [400/474], Loss: 2.1374, Accuray: 0.4296875, Time:42119.23950123787
Test: Epoch [57/100], Loss: 1.5640, Accuray Top5: 83.884%, Time:42368.51714372635
Train: Epoch [58/100], Step [100/474], Loss: 2.0827, Accuray: 0.5, Time:42482.13016915321
Train: Epoch [58/100], Step [200/474], Loss: 1.6301, Accuray: 0.53125, Time:42594.71694135666
Train: Epoch [58/100], Step [300/474], Loss: 2.1659, Accuray: 0.453125, Time:42707.17266654968
Train: Epoch [58/100], Step [400/474], Loss: 2.0307, Accuray: 0.4609375, Time:42819.64830136299
Test: Epoch [58/100], Loss: 1.5508, Accuray Top5: 84.062%, Time:43066.29175853729
Train: Epoch [59/100], Step [100/474], Loss: 1.9108, Accuray: 0.4765625, Time:43178.252930641174
Train: Epoch [59/100], Step [200/474], Loss: 2.2662, Accuray: 0.515625, Time:43289.81544017792
Train: Epoch [59/100], Step [300/474], Loss: 2.1131, Accuray: 0.5, Time:43402.18579125404
Train: Epoch [59/100], Step [400/474], Loss: 2.1706, Accuray: 0.4375, Time:43514.14139533043
Test: Epoch [59/100], Loss: 1.5648, Accuray Top5: 84.095%, Time:43760.37333369255
Train: Epoch [60/100], Step [100/474], Loss: 1.8111, Accuray: 0.5078125, Time:43872.352595090866
Train: Epoch [60/100], Step [200/474], Loss: 2.1489, Accuray: 0.453125, Time:43984.14572381973
Train: Epoch [60/100], Step [300/474], Loss: 2.2144, Accuray: 0.46875, Time:44095.73403739929
Train: Epoch [60/100], Step [400/474], Loss: 2.2364, Accuray: 0.4375, Time:44207.235766887665
Test: Epoch [60/100], Loss: 1.5248, Accuray Top5: 84.343%, Time:44453.138202667236
Train: Epoch [61/100], Step [100/474], Loss: 1.9432, Accuray: 0.4921875, Time:44564.61178255081
Train: Epoch [61/100], Step [200/474], Loss: 2.1834, Accuray: 0.484375, Time:44676.35194897652
Train: Epoch [61/100], Step [300/474], Loss: 1.9878, Accuray: 0.4765625, Time:44788.10416054726
Train: Epoch [61/100], Step [400/474], Loss: 2.0846, Accuray: 0.4453125, Time:44899.30825686455
Test: Epoch [61/100], Loss: 1.5594, Accuray Top5: 84.005%, Time:45144.90060591698
Train: Epoch [62/100], Step [100/474], Loss: 2.2873, Accuray: 0.453125, Time:45256.52103638649
Train: Epoch [62/100], Step [200/474], Loss: 2.1074, Accuray: 0.4609375, Time:45368.388887643814
Train: Epoch [62/100], Step [300/474], Loss: 1.8510, Accuray: 0.5546875, Time:45480.800854444504
Train: Epoch [62/100], Step [400/474], Loss: 2.1365, Accuray: 0.5234375, Time:45594.508204460144
Test: Epoch [62/100], Loss: 1.5378, Accuray Top5: 84.452%, Time:45844.94276881218
Train: Epoch [63/100], Step [100/474], Loss: 1.8966, Accuray: 0.546875, Time:45958.4521689415
Train: Epoch [63/100], Step [200/474], Loss: 2.2175, Accuray: 0.4375, Time:46071.47798037529
Train: Epoch [63/100], Step [300/474], Loss: 2.0436, Accuray: 0.4921875, Time:46183.69752240181
Train: Epoch [63/100], Step [400/474], Loss: 2.0490, Accuray: 0.5078125, Time:46295.88715958595
Test: Epoch [63/100], Loss: 1.5524, Accuray Top5: 84.251%, Time:46543.02051949501
Train: Epoch [64/100], Step [100/474], Loss: 2.3071, Accuray: 0.4375, Time:46655.096057891846
Train: Epoch [64/100], Step [200/474], Loss: 1.6621, Accuray: 0.5859375, Time:46767.177260160446
Train: Epoch [64/100], Step [300/474], Loss: 1.9458, Accuray: 0.5, Time:46879.111087560654
Train: Epoch [64/100], Step [400/474], Loss: 1.8946, Accuray: 0.5390625, Time:46990.37363100052
Test: Epoch [64/100], Loss: 1.5523, Accuray Top5: 84.035%, Time:47235.83550477028
Train: Epoch [65/100], Step [100/474], Loss: 2.1790, Accuray: 0.4765625, Time:47347.68172240257
Train: Epoch [65/100], Step [200/474], Loss: 2.4606, Accuray: 0.421875, Time:47459.36817193031
Train: Epoch [65/100], Step [300/474], Loss: 1.9845, Accuray: 0.484375, Time:47570.688000917435
Train: Epoch [65/100], Step [400/474], Loss: 1.9866, Accuray: 0.4453125, Time:47681.88188791275
Test: Epoch [65/100], Loss: 1.5291, Accuray Top5: 84.384%, Time:47926.691094875336
Train: Epoch [66/100], Step [100/474], Loss: 2.0369, Accuray: 0.484375, Time:48038.11135721207
Train: Epoch [66/100], Step [200/474], Loss: 2.1730, Accuray: 0.453125, Time:48150.30791640282
Train: Epoch [66/100], Step [300/474], Loss: 2.1540, Accuray: 0.4375, Time:48262.11685466766
Train: Epoch [66/100], Step [400/474], Loss: 2.1736, Accuray: 0.46875, Time:48374.55590677261
Test: Epoch [66/100], Loss: 1.5401, Accuray Top5: 84.132%, Time:48621.02256011963
Train: Epoch [67/100], Step [100/474], Loss: 1.8159, Accuray: 0.53125, Time:48732.90672373772
Train: Epoch [67/100], Step [200/474], Loss: 2.1073, Accuray: 0.4609375, Time:48844.287920475006
Train: Epoch [67/100], Step [300/474], Loss: 2.1869, Accuray: 0.453125, Time:48955.31909823418
Train: Epoch [67/100], Step [400/474], Loss: 2.0191, Accuray: 0.5703125, Time:49067.65460014343
Test: Epoch [67/100], Loss: 1.5198, Accuray Top5: 84.503%, Time:49314.11908745766
Train: Epoch [68/100], Step [100/474], Loss: 1.8585, Accuray: 0.515625, Time:49426.255719423294
Train: Epoch [68/100], Step [200/474], Loss: 2.2713, Accuray: 0.421875, Time:49539.114632606506
Train: Epoch [68/100], Step [300/474], Loss: 2.1385, Accuray: 0.484375, Time:49651.13024902344
Train: Epoch [68/100], Step [400/474], Loss: 2.2477, Accuray: 0.390625, Time:49762.985454559326
Test: Epoch [68/100], Loss: 1.5655, Accuray Top5: 84.116%, Time:50008.533529281616
Train: Epoch [69/100], Step [100/474], Loss: 1.9998, Accuray: 0.515625, Time:50119.77994513512
Train: Epoch [69/100], Step [200/474], Loss: 1.9977, Accuray: 0.4609375, Time:50231.047981739044
Train: Epoch [69/100], Step [300/474], Loss: 2.2343, Accuray: 0.40625, Time:50342.44116973877
Train: Epoch [69/100], Step [400/474], Loss: 1.8331, Accuray: 0.5625, Time:50453.70248103142
Test: Epoch [69/100], Loss: 1.5505, Accuray Top5: 83.973%, Time:50698.47202324867
Train: Epoch [70/100], Step [100/474], Loss: 1.8164, Accuray: 0.546875, Time:50808.34102725983
Train: Epoch [70/100], Step [200/474], Loss: 1.8985, Accuray: 0.515625, Time:50918.771512031555
Train: Epoch [70/100], Step [300/474], Loss: 2.0207, Accuray: 0.484375, Time:51029.751608371735
Train: Epoch [70/100], Step [400/474], Loss: 2.2550, Accuray: 0.4453125, Time:51140.463757276535
Test: Epoch [70/100], Loss: 1.5715, Accuray Top5: 84.159%, Time:51385.83610677719
Train: Epoch [71/100], Step [100/474], Loss: 1.9160, Accuray: 0.5234375, Time:51499.17638707161
Train: Epoch [71/100], Step [200/474], Loss: 2.1947, Accuray: 0.4296875, Time:51613.492225170135
Train: Epoch [71/100], Step [300/474], Loss: 1.8339, Accuray: 0.5, Time:51727.089869976044
Train: Epoch [71/100], Step [400/474], Loss: 1.9813, Accuray: 0.5078125, Time:51841.53798985481
Test: Epoch [71/100], Loss: 1.5405, Accuray Top5: 84.220%, Time:52092.5635406971
Train: Epoch [72/100], Step [100/474], Loss: 2.3046, Accuray: 0.4609375, Time:52206.706082582474
Train: Epoch [72/100], Step [200/474], Loss: 2.0942, Accuray: 0.4453125, Time:52320.281158447266
Train: Epoch [72/100], Step [300/474], Loss: 1.9534, Accuray: 0.5390625, Time:52434.623876810074
Train: Epoch [72/100], Step [400/474], Loss: 1.9673, Accuray: 0.5078125, Time:52548.488792181015
Test: Epoch [72/100], Loss: 1.5547, Accuray Top5: 84.137%, Time:52799.366473436356
Train: Epoch [73/100], Step [100/474], Loss: 2.3106, Accuray: 0.453125, Time:52913.853283405304
Train: Epoch [73/100], Step [200/474], Loss: 1.8542, Accuray: 0.53125, Time:53027.6520447731
Train: Epoch [73/100], Step [300/474], Loss: 1.8605, Accuray: 0.5625, Time:53140.62022805214
Train: Epoch [73/100], Step [400/474], Loss: 2.0485, Accuray: 0.453125, Time:53253.18365955353
Test: Epoch [73/100], Loss: 1.5507, Accuray Top5: 84.264%, Time:53500.62557601929
Train: Epoch [74/100], Step [100/474], Loss: 2.0771, Accuray: 0.453125, Time:53612.96114873886
Train: Epoch [74/100], Step [200/474], Loss: 2.0293, Accuray: 0.484375, Time:53724.94324040413
Train: Epoch [74/100], Step [300/474], Loss: 2.0406, Accuray: 0.515625, Time:53836.54557442665
Train: Epoch [74/100], Step [400/474], Loss: 2.1807, Accuray: 0.3984375, Time:53948.06725716591
Test: Epoch [74/100], Loss: 1.5486, Accuray Top5: 84.140%, Time:54194.004207611084
Train: Epoch [75/100], Step [100/474], Loss: 2.0168, Accuray: 0.4921875, Time:54306.03158545494
Train: Epoch [75/100], Step [200/474], Loss: 2.4516, Accuray: 0.4296875, Time:54417.792036771774
Train: Epoch [75/100], Step [300/474], Loss: 2.0674, Accuray: 0.5, Time:54529.62415385246
Train: Epoch [75/100], Step [400/474], Loss: 1.9844, Accuray: 0.4921875, Time:54641.66813611984
Test: Epoch [75/100], Loss: 1.5296, Accuray Top5: 84.502%, Time:54887.579416036606
Train: Epoch [76/100], Step [100/474], Loss: 2.0894, Accuray: 0.4375, Time:55000.283561468124
Train: Epoch [76/100], Step [200/474], Loss: 2.2070, Accuray: 0.4296875, Time:55112.254207372665
Train: Epoch [76/100], Step [300/474], Loss: 1.8674, Accuray: 0.5078125, Time:55224.51421427727
Train: Epoch [76/100], Step [400/474], Loss: 1.9781, Accuray: 0.4765625, Time:55336.07882499695
Test: Epoch [76/100], Loss: 1.5434, Accuray Top5: 84.275%, Time:55582.24361753464
Train: Epoch [77/100], Step [100/474], Loss: 2.1344, Accuray: 0.4375, Time:55693.74483537674
Train: Epoch [77/100], Step [200/474], Loss: 2.2581, Accuray: 0.4140625, Time:55805.75948548317
Train: Epoch [77/100], Step [300/474], Loss: 1.9317, Accuray: 0.515625, Time:55918.08954501152
Train: Epoch [77/100], Step [400/474], Loss: 1.8889, Accuray: 0.4765625, Time:56030.052839279175
Test: Epoch [77/100], Loss: 1.5576, Accuray Top5: 84.097%, Time:56276.117950201035
Train: Epoch [78/100], Step [100/474], Loss: 2.1644, Accuray: 0.4609375, Time:56387.829013347626
Train: Epoch [78/100], Step [200/474], Loss: 2.1859, Accuray: 0.4453125, Time:56499.81430745125
Train: Epoch [78/100], Step [300/474], Loss: 2.3854, Accuray: 0.421875, Time:56611.83060145378
Train: Epoch [78/100], Step [400/474], Loss: 1.8199, Accuray: 0.53125, Time:56722.9287238121
Test: Epoch [78/100], Loss: 1.5479, Accuray Top5: 84.133%, Time:56968.06892371178
Train: Epoch [79/100], Step [100/474], Loss: 2.1259, Accuray: 0.46875, Time:57080.05714774132
Train: Epoch [79/100], Step [200/474], Loss: 2.1891, Accuray: 0.484375, Time:57192.180884838104
Train: Epoch [79/100], Step [300/474], Loss: 1.8937, Accuray: 0.5234375, Time:57304.68445253372
Train: Epoch [79/100], Step [400/474], Loss: 2.2564, Accuray: 0.46875, Time:57417.722873687744
Test: Epoch [79/100], Loss: 1.5360, Accuray Top5: 84.281%, Time:57666.440616607666
Train: Epoch [80/100], Step [100/474], Loss: 2.0042, Accuray: 0.5546875, Time:57779.87931370735
Train: Epoch [80/100], Step [200/474], Loss: 2.0343, Accuray: 0.484375, Time:57893.591576099396
Train: Epoch [80/100], Step [300/474], Loss: 2.1983, Accuray: 0.4296875, Time:58008.28239274025
Train: Epoch [80/100], Step [400/474], Loss: 2.0936, Accuray: 0.5, Time:58124.15678524971
Test: Epoch [80/100], Loss: 1.5497, Accuray Top5: 84.232%, Time:58379.62798333168
Train: Epoch [81/100], Step [100/474], Loss: 1.9480, Accuray: 0.453125, Time:58495.752026081085
Train: Epoch [81/100], Step [200/474], Loss: 2.1009, Accuray: 0.4609375, Time:58613.162445783615
Train: Epoch [81/100], Step [300/474], Loss: 2.0837, Accuray: 0.484375, Time:58731.52855038643
Train: Epoch [81/100], Step [400/474], Loss: 1.9822, Accuray: 0.46875, Time:58848.731373786926
Test: Epoch [81/100], Loss: 1.5650, Accuray Top5: 84.202%, Time:59104.69765043259
Train: Epoch [82/100], Step [100/474], Loss: 2.2171, Accuray: 0.484375, Time:59220.81744599342
Train: Epoch [82/100], Step [200/474], Loss: 1.9691, Accuray: 0.5078125, Time:59336.70530843735
Train: Epoch [82/100], Step [300/474], Loss: 2.4214, Accuray: 0.4140625, Time:59451.891515254974
Train: Epoch [82/100], Step [400/474], Loss: 2.0857, Accuray: 0.484375, Time:59566.21063184738
Test: Epoch [82/100], Loss: 1.5585, Accuray Top5: 84.110%, Time:59815.164843797684
Train: Epoch [83/100], Step [100/474], Loss: 1.7989, Accuray: 0.546875, Time:59927.82256221771
Train: Epoch [83/100], Step [200/474], Loss: 1.9196, Accuray: 0.484375, Time:60040.06000113487
Train: Epoch [83/100], Step [300/474], Loss: 2.0167, Accuray: 0.5234375, Time:60152.74048805237
Train: Epoch [83/100], Step [400/474], Loss: 2.1545, Accuray: 0.4921875, Time:60265.230528116226
Test: Epoch [83/100], Loss: 1.5482, Accuray Top5: 84.427%, Time:60512.97051167488
Train: Epoch [84/100], Step [100/474], Loss: 2.0041, Accuray: 0.5234375, Time:60625.46621823311
Train: Epoch [84/100], Step [200/474], Loss: 2.1904, Accuray: 0.4609375, Time:60737.93963456154
Train: Epoch [84/100], Step [300/474], Loss: 2.2547, Accuray: 0.453125, Time:60850.131779670715
Train: Epoch [84/100], Step [400/474], Loss: 2.2696, Accuray: 0.453125, Time:60962.508825302124
Test: Epoch [84/100], Loss: 1.5396, Accuray Top5: 84.098%, Time:61210.46241211891
Train: Epoch [85/100], Step [100/474], Loss: 1.9357, Accuray: 0.5546875, Time:61324.003653526306
Train: Epoch [85/100], Step [200/474], Loss: 2.2011, Accuray: 0.4765625, Time:61437.44630026817
Train: Epoch [85/100], Step [300/474], Loss: 2.2191, Accuray: 0.4609375, Time:61550.38420367241
Train: Epoch [85/100], Step [400/474], Loss: 2.1403, Accuray: 0.4453125, Time:61663.61207175255
Test: Epoch [85/100], Loss: 1.5373, Accuray Top5: 84.195%, Time:61913.465322732925
Train: Epoch [86/100], Step [100/474], Loss: 1.9389, Accuray: 0.578125, Time:62027.18660616875
Train: Epoch [86/100], Step [200/474], Loss: 2.0323, Accuray: 0.5, Time:62140.6527569294
Train: Epoch [86/100], Step [300/474], Loss: 2.1003, Accuray: 0.5, Time:62254.17345380783
Train: Epoch [86/100], Step [400/474], Loss: 1.9707, Accuray: 0.53125, Time:62367.397153139114
Test: Epoch [86/100], Loss: 1.5417, Accuray Top5: 84.575%, Time:62614.91524744034
Train: Epoch [87/100], Step [100/474], Loss: 1.7820, Accuray: 0.59375, Time:62727.759536743164
Train: Epoch [87/100], Step [200/474], Loss: 2.1683, Accuray: 0.484375, Time:62840.893815755844
Train: Epoch [87/100], Step [300/474], Loss: 1.9548, Accuray: 0.484375, Time:62953.91754460335
Train: Epoch [87/100], Step [400/474], Loss: 2.1195, Accuray: 0.484375, Time:63067.558069229126
Test: Epoch [87/100], Loss: 1.5645, Accuray Top5: 84.233%, Time:63321.3933596611
Train: Epoch [88/100], Step [100/474], Loss: 1.9120, Accuray: 0.46875, Time:63442.00905132294
Train: Epoch [88/100], Step [200/474], Loss: 2.1395, Accuray: 0.453125, Time:63562.0526702404
Train: Epoch [88/100], Step [300/474], Loss: 1.8541, Accuray: 0.5, Time:63682.18285536766
Train: Epoch [88/100], Step [400/474], Loss: 2.0166, Accuray: 0.484375, Time:63801.21332883835
Test: Epoch [88/100], Loss: 1.5445, Accuray Top5: 84.182%, Time:64061.488812208176
Train: Epoch [89/100], Step [100/474], Loss: 2.0394, Accuray: 0.5078125, Time:64181.12900972366
Train: Epoch [89/100], Step [200/474], Loss: 2.0278, Accuray: 0.453125, Time:64300.95904326439
Train: Epoch [89/100], Step [300/474], Loss: 2.0570, Accuray: 0.4765625, Time:64419.808411598206
Train: Epoch [89/100], Step [400/474], Loss: 1.9011, Accuray: 0.5390625, Time:64538.60167360306
Test: Epoch [89/100], Loss: 1.5287, Accuray Top5: 84.706%, Time:64799.82236909866
Train: Epoch [90/100], Step [100/474], Loss: 1.8845, Accuray: 0.5078125, Time:64918.64334774017
Train: Epoch [90/100], Step [200/474], Loss: 2.1139, Accuray: 0.453125, Time:65036.392708063126
Train: Epoch [90/100], Step [300/474], Loss: 2.0196, Accuray: 0.4453125, Time:65153.50872206688
Train: Epoch [90/100], Step [400/474], Loss: 1.9257, Accuray: 0.5546875, Time:65270.405084848404
Test: Epoch [90/100], Loss: 1.5422, Accuray Top5: 84.469%, Time:65527.09473609924
Train: Epoch [91/100], Step [100/474], Loss: 2.0739, Accuray: 0.46875, Time:65642.91249632835
Train: Epoch [91/100], Step [200/474], Loss: 2.1518, Accuray: 0.4765625, Time:65758.75939536095
Train: Epoch [91/100], Step [300/474], Loss: 2.1157, Accuray: 0.46875, Time:65874.57527804375
Train: Epoch [91/100], Step [400/474], Loss: 2.0264, Accuray: 0.4296875, Time:65991.7126801014
Test: Epoch [91/100], Loss: 1.5410, Accuray Top5: 84.469%, Time:66247.33048582077
Train: Epoch [92/100], Step [100/474], Loss: 2.0971, Accuray: 0.5, Time:66362.83413243294
Train: Epoch [92/100], Step [200/474], Loss: 2.1166, Accuray: 0.5, Time:66478.5843846798
Train: Epoch [92/100], Step [300/474], Loss: 2.0527, Accuray: 0.5078125, Time:66594.59516978264
Train: Epoch [92/100], Step [400/474], Loss: 2.0024, Accuray: 0.453125, Time:66711.07969784737
Test: Epoch [92/100], Loss: 1.5451, Accuray Top5: 84.424%, Time:66967.21227312088
Train: Epoch [93/100], Step [100/474], Loss: 2.1659, Accuray: 0.4140625, Time:67083.83527827263
Train: Epoch [93/100], Step [200/474], Loss: 1.9222, Accuray: 0.53125, Time:67202.28112602234
Train: Epoch [93/100], Step [300/474], Loss: 1.8600, Accuray: 0.515625, Time:67317.03545689583
Train: Epoch [93/100], Step [400/474], Loss: 1.8615, Accuray: 0.546875, Time:67431.70088005066
Test: Epoch [93/100], Loss: 1.5530, Accuray Top5: 84.193%, Time:67683.91418457031
Train: Epoch [94/100], Step [100/474], Loss: 2.0152, Accuray: 0.4765625, Time:67798.11846804619
Train: Epoch [94/100], Step [200/474], Loss: 2.4537, Accuray: 0.3828125, Time:67912.06354808807
Train: Epoch [94/100], Step [300/474], Loss: 1.8891, Accuray: 0.4609375, Time:68026.04016208649
Train: Epoch [94/100], Step [400/474], Loss: 1.9794, Accuray: 0.484375, Time:68139.91571164131
Test: Epoch [94/100], Loss: 1.5621, Accuray Top5: 84.231%, Time:68388.39507675171
Train: Epoch [95/100], Step [100/474], Loss: 2.0512, Accuray: 0.484375, Time:68501.67758369446
Train: Epoch [95/100], Step [200/474], Loss: 2.0102, Accuray: 0.4921875, Time:68615.1642549038
Train: Epoch [95/100], Step [300/474], Loss: 2.0560, Accuray: 0.453125, Time:68728.1279911995
Train: Epoch [95/100], Step [400/474], Loss: 1.8442, Accuray: 0.5390625, Time:68841.44136476517
Test: Epoch [95/100], Loss: 1.5693, Accuray Top5: 84.053%, Time:69089.336769104
Train: Epoch [96/100], Step [100/474], Loss: 2.0699, Accuray: 0.421875, Time:69201.28339743614
Train: Epoch [96/100], Step [200/474], Loss: 1.9712, Accuray: 0.4921875, Time:69312.4863884449
Train: Epoch [96/100], Step [300/474], Loss: 1.8551, Accuray: 0.5625, Time:69423.48198342323
Train: Epoch [96/100], Step [400/474], Loss: 2.5842, Accuray: 0.421875, Time:69535.10225176811
Test: Epoch [96/100], Loss: 1.5352, Accuray Top5: 84.254%, Time:69778.99824476242
Train: Epoch [97/100], Step [100/474], Loss: 2.1460, Accuray: 0.4609375, Time:69890.30219364166
Train: Epoch [97/100], Step [200/474], Loss: 2.0327, Accuray: 0.5234375, Time:70001.19600462914
Train: Epoch [97/100], Step [300/474], Loss: 2.4997, Accuray: 0.390625, Time:70112.27918815613
Train: Epoch [97/100], Step [400/474], Loss: 1.6756, Accuray: 0.59375, Time:70223.07345032692
Test: Epoch [97/100], Loss: 1.5343, Accuray Top5: 84.614%, Time:70467.16943192482
Train: Epoch [98/100], Step [100/474], Loss: 2.0119, Accuray: 0.53125, Time:70578.2054605484
Train: Epoch [98/100], Step [200/474], Loss: 1.7770, Accuray: 0.578125, Time:70689.10548114777
Train: Epoch [98/100], Step [300/474], Loss: 2.0845, Accuray: 0.5546875, Time:70800.02456021309
Train: Epoch [98/100], Step [400/474], Loss: 1.9110, Accuray: 0.4453125, Time:70911.1596326828
Test: Epoch [98/100], Loss: 1.5658, Accuray Top5: 83.808%, Time:71155.3326690197
Train: Epoch [99/100], Step [100/474], Loss: 2.1110, Accuray: 0.484375, Time:71266.24795937538
Train: Epoch [99/100], Step [200/474], Loss: 1.8407, Accuray: 0.5546875, Time:71376.83968544006
Train: Epoch [99/100], Step [300/474], Loss: 1.9315, Accuray: 0.4921875, Time:71488.76046204567
Train: Epoch [99/100], Step [400/474], Loss: 2.3544, Accuray: 0.3984375, Time:71599.51326322556
Test: Epoch [99/100], Loss: 1.5518, Accuray Top5: 84.312%, Time:71843.6531715393
Train: Epoch [100/100], Step [100/474], Loss: 2.2327, Accuray: 0.453125, Time:71954.79863262177
Train: Epoch [100/100], Step [200/474], Loss: 2.1276, Accuray: 0.46875, Time:72065.77073431015
Train: Epoch [100/100], Step [300/474], Loss: 2.0737, Accuray: 0.53125, Time:72176.66974329948
Train: Epoch [100/100], Step [400/474], Loss: 1.7872, Accuray: 0.5703125, Time:72287.86125087738
Test: Epoch [100/100], Loss: 1.5535, Accuray Top5: 84.157%, Time:72531.56107902527

4.1 Save Model

Here we always save the most recent model.

In [10]:
torch.save(model.state_dict(), 'models/temp/food101_{}.pth'.format(time.time()))

4.2 Load Model

In [11]:
# Most recent model: food101_model01/.pth 是最新模型
# model.load_state_dict(torch.load('models/food101_model01.pth', map_location=device)

# Model with lowest loss: food101_less_loss.pth 是当前拥有最低loss的模型
model.load_state_dict(torch.load('models/food101_less_loss.pth', map_location=device))
Out[11]:
<All keys matched successfully>

5 Test Model

In [14]:
model.eval()
start = time.time()

test_loss_min = np.Inf
test_loss=0
test_accuracy=0
with torch.no_grad():
    for i, data in enumerate(test_loader):
        # if i == 30: break

        images, labels = data[0].to(device), data[1].to(device)
        
        # Forward
        outputs = model(images)
        batch_loss = criterion(outputs, labels)
        test_loss += batch_loss.item()

        # Topk
        top_p, top_class = outputs.topk(5, dim=1)
        labels = labels.view(-1,1)        #  [n] 转为 [n, 1] , 以便与 [n, 5] 做比较    
        
        # Predict correctly?
        results =  top_class == labels # tensor([[False, False, False,  True, False],  [False,  True, False, False, False], ....)
        results = results.sum(1)          # tensor([1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1]), 每一行只要有一个为True就返回1, dim=1表示对维度1求和
        
        # Avg accuracy / Loss
        test_accuracy += torch.mean(results.float()).item()
        avg_test_accuracy = test_accuracy/(i+1)
        avg_test_loss = test_loss/(i+1)
        
        time_cost = time.time()-start
        print ('Test: Step [{}/{}], Loss: {:.4f}, Accuray Top5: {:.3%}, Time:{}'.format(i+1, len(test_loader), avg_test_loss, avg_test_accuracy, time_cost))

# End testing
model.train()
print('Finish Testing')
Test: Step [316/316], Loss: {1.0986873224}, Accuray Top5: {93.944%}, Time:{148.238746823}
Finish Testing

5 Case Test

In [15]:
from PIL import Image
# list of class names by index, i.e. a name can be accessed like class_names[0]
#class_names = [item[4:] for item in train_data.classes]


def predict(img_path, topk=5):
    """
    给定一张图的路径, 对其类别进行预测
    given a img, predict its class.
    """
    model.eval()
    
    # preprocess img. 按指定格式对图片进行压缩处理
    img = Image.open(img_path)
    img = test_transforms(img)

    # img: [3, 224, 224]
    # 因为model接受batch的维度是[n, 3, 224, 224], 所以要在最前面增加一维度
    img = torch.unsqueeze(img, 0)
    
    # Predict top5
    outputs = torch.exp(model(img.to(device)))
    top_p, top_class = outputs.topk(topk, dim=1)

    pred = top_class.data.cpu().numpy()[0]
    results = [train_data.classes[i] for i in pred]
    return results
In [78]:
diy_path = os.path.join(data_path, 'test/apple_pie')
for img_name in os.listdir(diy_path):
    if '.DS_Store' in img_name: continue
    img_path = os.path.join(diy_path, img_name)
    img = cv2.imread(img_path)
    plt.imshow(img[...,::-1])    # bgr 转为 rgb
    plt.xticks([])
    plt.yticks([])
    plt.show()

    results = predict(img_path)
    print('Predict:', results)
Predict:\<b\>a\<b\/\> ['waffles', 'apple_pie', 'gyoza', 'pancakes', 'baklava']
Predict:\<b\>a\<b\/\> ['carrot_cake', 'chocolate_cake', 'frozen_yogurt', 'red_velvet_cake', 'apple_pie']
Predict:\<b\>a\<b\/\> ['apple_pie', 'carrot_cake', 'bread_pudding', 'baklava', 'chicken_curry']
Predict:\<b\>a\<b\/\> ['poutine', 'fried_calamari', 'macaroni_and_cheese', 'gnocchi', 'scallops']
Predict:\<b\>a\<b\/\> ['french_toast', 'grilled_cheese_sandwich', 'omelette', 'pancakes', 'pork_chop']
Predict:\<b\>a\<b\/\> ['apple_pie', 'grilled_cheese_sandwich', 'french_toast', 'waffles', 'garlic_bread']
Predict:\<b\>a\<b\/\> ['apple_pie', 'bread_pudding', 'gyoza', 'samosa', 'peking_duck']
Predict:\<b\>a\<b\/\> ['apple_pie', 'bread_pudding', 'gnocchi', 'waffles', 'macaroni_and_cheese']
Predict:\<b\>a\<b\/\> ['pancakes', 'french_toast', 'peking_duck', 'hamburger', 'beignets']
Predict:\<b\>a\<b\/\> ['cannoli', 'frozen_yogurt', 'falafel', 'samosa', 'escargots']
Predict:\<b\>a\<b\/\> ['apple_pie', 'pancakes', 'creme_brulee', 'donuts', 'beignets']
Predict:\<b\>a\<b\/\> ['apple_pie', 'baklava', 'peking_duck', 'gyoza', 'pancakes']
Predict:\<b\>a\<b\/\> ['tiramisu', 'chocolate_mousse', 'apple_pie', 'bread_pudding', 'cannoli']
Predict:\<b\>a\<b\/\> ['apple_pie', 'pancakes', 'waffles', 'carrot_cake', 'bread_pudding']
Predict:\<b\>a\<b\/\> ['gnocchi', 'carrot_cake', 'macaroni_and_cheese', 'apple_pie', 'risotto']
Predict:\<b\>a\<b\/\> ['bread_pudding', 'apple_pie', 'lasagna', 'tiramisu', 'gnocchi']
Predict:\<b\>a\<b\/\> ['breakfast_burrito', 'samosa', 'apple_pie', 'bread_pudding', 'baklava']
Predict:\<b\>a\<b\/\> ['apple_pie', 'hamburger', 'baklava', 'pancakes', 'garlic_bread']
Predict:\<b\>a\<b\/\> ['beignets', 'cannoli', 'bread_pudding', 'waffles', 'french_toast']
Predict:\<b\>a\<b\/\> ['tiramisu', 'chocolate_cake', 'chocolate_mousse', 'carrot_cake', 'cheesecake']
Predict:\<b\>a\<b\/\> ['beignets', 'macarons', 'apple_pie', 'donuts', 'pancakes']
Predict:\<b\>a\<b\/\> ['apple_pie', 'carrot_cake', 'baklava', 'bread_pudding', 'lasagna']
Predict:\<b\>a\<b\/\> ['takoyaki', 'chocolate_cake', 'peking_duck', 'apple_pie', 'foie_gras']
Predict:\<b\>a\<b\/\> ['waffles', 'grilled_cheese_sandwich', 'baklava', 'hamburger', 'club_sandwich']
Predict:\<b\>a\<b\/\> ['panna_cotta', 'waffles', 'cannoli', 'chocolate_cake', 'creme_brulee']
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-78-f00fe65a40fe> in <module>
      7     plt.xticks([])
      8     plt.yticks([])
----> 9     plt.show()
     10 
     11     results = predict(img_path)

/opt/conda/lib/python3.7/site-packages/matplotlib/pyplot.py in show(*args, **kw)
    270     """
    271     global _show
--> 272     return _show(*args, **kw)
    273 
    274 

/opt/conda/lib/python3.7/site-packages/ipykernel/pylab/backend_inline.py in show(close, block)
     41             display(
     42                 figure_manager.canvas.figure,
---> 43                 metadata=_fetch_figure_metadata(figure_manager.canvas.figure)
     44             )
     45     finally:

/opt/conda/lib/python3.7/site-packages/IPython/core/display.py in display(include, exclude, metadata, transient, display_id, *objs, **kwargs)
    311             publish_display_data(data=obj, metadata=metadata, **kwargs)
    312         else:
--> 313             format_dict, md_dict = format(obj, include=include, exclude=exclude)
    314             if not format_dict:
    315                 # nothing to display (e.g. _ipython_display_ took over)

/opt/conda/lib/python3.7/site-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
    178             md = None
    179             try:
--> 180                 data = formatter(obj)
    181             except:
    182                 # FIXME: log the exception

<decorator-gen-9> in __call__(self, obj)

/opt/conda/lib/python3.7/site-packages/IPython/core/formatters.py in catch_format_error(method, self, *args, **kwargs)
    222     """show traceback on failed format call"""
    223     try:
--> 224         r = method(self, *args, **kwargs)
    225     except NotImplementedError:
    226         # don't warn on NotImplementedErrors

/opt/conda/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
    339                 pass
    340             else:
--> 341                 return printer(obj)
    342             # Finally look for special method names
    343             method = get_real_method(obj, self.print_method)

/opt/conda/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
    248         png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
    249     if 'retina' in formats or 'png2x' in formats:
--> 250         png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
    251     if 'jpg' in formats or 'jpeg' in formats:
    252         jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))

/opt/conda/lib/python3.7/site-packages/IPython/core/pylabtools.py in retina_figure(fig, **kwargs)
    138 def retina_figure(fig, **kwargs):
    139     """format a figure as a pixel-doubled (retina) PNG"""
--> 140     pngdata = print_figure(fig, fmt='retina', **kwargs)
    141     # Make sure that retina_figure acts just like print_figure and returns
    142     # None when the figure is empty.

/opt/conda/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
    130         FigureCanvasBase(fig)
    131 
--> 132     fig.canvas.print_figure(bytes_io, **kw)
    133     data = bytes_io.getvalue()
    134     if fmt == 'svg':

/opt/conda/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
   2080                     bbox_artists = kwargs.pop("bbox_extra_artists", None)
   2081                     bbox_inches = self.figure.get_tightbbox(renderer,
-> 2082                             bbox_extra_artists=bbox_artists)
   2083                     pad = kwargs.pop("pad_inches", None)
   2084                     if pad is None:

/opt/conda/lib/python3.7/site-packages/matplotlib/figure.py in get_tightbbox(self, renderer, bbox_extra_artists)
   2383 
   2384         for a in artists:
-> 2385             bbox = a.get_tightbbox(renderer)
   2386             if bbox is not None and (bbox.width != 0 or bbox.height != 0):
   2387                 bb.append(bbox)

/opt/conda/lib/python3.7/site-packages/matplotlib/artist.py in get_tightbbox(self, renderer)
    280             The enclosing bounding box (in figure pixel co-ordinates).
    281         """
--> 282         bbox = self.get_window_extent(renderer)
    283         if self.get_clip_on():
    284             clip_box = self.get_clip_box()

/opt/conda/lib/python3.7/site-packages/matplotlib/spines.py in get_window_extent(self, renderer)
    213             bboxes.append(bb0)
    214 
--> 215         return mtransforms.Bbox.union(bboxes)
    216 
    217     def get_path(self):

/opt/conda/lib/python3.7/site-packages/matplotlib/transforms.py in union(bboxes)
    704             x1 = np.max([bbox.xmax for bbox in bboxes])
    705             y0 = np.min([bbox.ymin for bbox in bboxes])
--> 706             y1 = np.max([bbox.ymax for bbox in bboxes])
    707         return Bbox([[x0, y0], [x1, y1]])
    708 

/opt/conda/lib/python3.7/site-packages/matplotlib/transforms.py in <listcomp>(.0)
    704             x1 = np.max([bbox.xmax for bbox in bboxes])
    705             y0 = np.min([bbox.ymin for bbox in bboxes])
--> 706             y1 = np.max([bbox.ymax for bbox in bboxes])
    707         return Bbox([[x0, y0], [x1, y1]])
    708 

/opt/conda/lib/python3.7/site-packages/matplotlib/transforms.py in ymax(self)
    362     def ymax(self):
    363         """The top edge of the bounding box."""
--> 364         return np.max(self.get_points()[:, 1])
    365 
    366     @property

KeyboardInterrupt: 
In [ ]: